schedule_tail is now an indirect function call in x86 architecture.
3ddb79c3r9-31dIsewPV3P3i8HALsQ xen/include/asm-x86/delay.h
3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-x86/desc.h
40715b2dTokMLYGSuD58BnxOqyWVew xen/include/asm-x86/div64.h
-40f2b4a2hC3HtChu-ArD8LyojxWMjg xen/include/asm-x86/domain.h
3e20b82fl1jmQiKdLy7fxMcutfpjWA xen/include/asm-x86/domain_page.h
3ddb79c3NU8Zy40OTrq3D-i30Y3t4A xen/include/asm-x86/fixmap.h
3e2d29944GI24gf7vOP_7x8EyuqxeA xen/include/asm-x86/flushtlb.h
3eb165e0eawr3R-p2ZQtSdLWtLRN_A xen/include/xen/console.h
3ddb79c1V44RD26YqCUm-kqIupM37A xen/include/xen/ctype.h
3ddb79c05DdHQ0UxX_jKsXdR4QlMCA xen/include/xen/delay.h
+40f2b4a2hC3HtChu-ArD8LyojxWMjg xen/include/xen/domain.h
3ddb79c2O729EttZTYu1c8LcsUO_GQ xen/include/xen/elf.h
3ddb79c0HIghfBF8zFUdmXhOU8i6hA xen/include/xen/errno.h
3ddb79c1W0lQca8gRV7sN6j3iY4Luw xen/include/xen/event.h
memcmp:
idt_tables:
new_thread:
-.globl switch_to, continue_nonidle_task, __get_user_1, paging_init, trap_init
+.globl switch_to, __get_user_1, paging_init, trap_init
switch_to:
-continue_nonidle_task:
__get_user_1:
paging_init:
trap_init:
#define round_pgup(_p) (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
#define round_pgdown(_p) ((_p)&PAGE_MASK)
-int hlt_counter;
-
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-
-/*
- * We use this if we don't have any better
- * idle routine..
- */
static void default_idle(void)
{
- if ( hlt_counter == 0 )
- {
- __cli();
- if ( !softirq_pending(smp_processor_id()) )
- safe_halt();
- else
- __sti();
- }
+ __cli();
+ if ( !softirq_pending(smp_processor_id()) )
+ safe_halt();
+ else
+ __sti();
}
-void continue_cpu_idle_loop(void)
+static void idle_loop(void)
{
int cpu = smp_processor_id();
for ( ; ; )
smp_mb();
init_idle();
- continue_cpu_idle_loop();
+ idle_loop();
}
static long no_idt[2];
free_xenheap_page((unsigned long)d->mm.perdomain_pt);
}
+static void continue_idle_task(struct domain *d)
+{
+ reset_stack_and_jump(idle_loop);
+}
+
+static void continue_nonidle_task(struct domain *d)
+{
+ reset_stack_and_jump(ret_from_intr);
+}
+
void arch_do_createdomain(struct domain *d)
{
- d->shared_info = (void *)alloc_xenheap_page();
- memset(d->shared_info, 0, PAGE_SIZE);
- d->shared_info->arch.mfn_to_pfn_start =
- virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
- SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
- machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
-
- d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
- memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
- machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
+#ifdef ARCH_HAS_FAST_TRAP
+ SET_DEFAULT_FAST_TRAP(&d->thread);
+#endif
+
+ if ( d->id == IDLE_DOMAIN_ID )
+ {
+ d->thread.schedule_tail = continue_idle_task;
+ }
+ else
+ {
+ d->thread.schedule_tail = continue_nonidle_task;
+
+ d->shared_info = (void *)alloc_xenheap_page();
+ memset(d->shared_info, 0, PAGE_SIZE);
+ d->shared_info->arch.mfn_to_pfn_start =
+ virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
+ SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
+ machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
+ PAGE_SHIFT] = INVALID_P2M_ENTRY;
+
+ d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
+ memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
+ machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >>
+ PAGE_SHIFT] = INVALID_P2M_ENTRY;
+ }
}
int arch_final_setup_guestos(struct domain *d, full_execution_context_t *c)
sizeof(d->thread.traps));
#ifdef ARCH_HAS_FAST_TRAP
- SET_DEFAULT_FAST_TRAP(&d->thread);
if ( (rc = (int)set_fast_trap(d, c->fast_trap_idx)) != 0 )
return rc;
#endif
__save_flags(ec->eflags);
ec->eflags |= X86_EFLAGS_IF;
-
- /* No fast trap at start of day. */
- SET_DEFAULT_FAST_TRAP(&d->thread);
}
sort_exception_tables();
+ arch_do_createdomain(current);
+
/* Tell the PCI layer not to allocate too close to the RAM area.. */
low_mem_size = ((max_page << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
if ( low_mem_size > pci_mem_start ) pci_mem_start = low_mem_size;
map_cpu_to_boot_apicid(cpu, apicid);
-#if defined(__i386__)
- SET_DEFAULT_FAST_TRAP(&idle->thread);
-#endif
-
idle_task[cpu] = idle;
/* start_eip had better be page-aligned! */
andl $~3,reg; \
movl (reg),reg;
-ENTRY(continue_nonidle_task)
- GET_CURRENT(%ebx)
- jmp test_all_events
-
ALIGN
restore_all_guest:
testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
/* Per-domain PCI-device list. */
spin_lock_init(&d->pcidev_lock);
INIT_LIST_HEAD(&d->pcidev_list);
+
+ if ( (d->id != IDLE_DOMAIN_ID) &&
+ ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
+ {
+ destroy_event_channels(d);
+ free_domain_struct(d);
+ return NULL;
+ }
+
+ arch_do_createdomain(d);
+
+ sched_add_domain(d);
if ( d->id != IDLE_DOMAIN_ID )
{
- if ( (init_event_channels(d) != 0) || (grant_table_create(d) != 0) )
- {
- destroy_event_channels(d);
- free_domain_struct(d);
- return NULL;
- }
-
- arch_do_createdomain(d);
-
- sched_add_domain(d);
-
write_lock(&domlist_lock);
pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
domain_hash[DOMAIN_HASH(dom_id)] = d;
write_unlock(&domlist_lock);
}
- else
- {
- sched_add_domain(d);
- }
return d;
}
+++ /dev/null
-
-#ifndef __ASM_X86_DOMAIN_H__
-#define __ASM_X86_DOMAIN_H__
-
-extern void arch_do_createdomain(struct domain *d);
-
-extern int arch_final_setup_guestos(
- struct domain *d, full_execution_context_t *c);
-
-extern void free_perdomain_pt(struct domain *d);
-
-extern void domain_relinquish_memory(struct domain *d);
-
-#endif /* __ASM_X86_DOMAIN_H__ */
/* general user-visible register state */
execution_context_t user_ctxt;
+ void (*schedule_tail) (struct domain *);
+
/*
* Return vectors pushed to us by guest OS.
* The stack frame for events is exactly that of an x86 hardware interrupt.
long set_fast_trap(struct domain *p, int idx);
-#define INIT_THREAD { fast_trap_idx: 0x20 }
-
-#elif defined(__x86_64__)
+#endif
#define INIT_THREAD { 0 }
-#endif /* __x86_64__ */
-
extern int gpf_emulate_4gb(struct xen_regs *regs);
struct mm_struct {
return !!(flags & (1<<9)); /* EFLAGS_IF */
}
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
#define BROKEN_ACPI_Sx 0x0001
#define BROKEN_INIT_AFTER_S1 0x0002
return p;
}
-#define schedule_tail(_p) \
+#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
- "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1" \
- : : "r" (~(STACK_SIZE-1)), \
- "r" (unlikely(is_idle_task((_p))) ? \
- continue_cpu_idle_loop : \
- continue_nonidle_task), \
- "i" (STACK_SIZE-STACK_RESERVED) )
+ "movl %0,%%esp; jmp "STR(__fn) \
+ : : "r" (get_execution_context()) )
+#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d)
#endif /* _X86_CURRENT_H */
return p;
}
-#define schedule_tail(_p) \
+#define reset_stack_and_jump(__fn) \
__asm__ __volatile__ ( \
- "andq %%rsp,%0; addq %2,%0; movq %0,%%rsp; jmp *%1" \
- : : "r" (~(STACK_SIZE-1)), \
- "r" (unlikely(is_idle_task((_p))) ? \
- continue_cpu_idle_loop : \
- continue_nonidle_task), \
- "i" (STACK_SIZE-STACK_RESERVED) )
+ "movq %0,%%rsp; jmp "STR(__fn) \
+ : : "r" (get_execution_context()) )
+#define schedule_tail(_d) ((_d)->thread.schedule_tail)(_d)
#else
--- /dev/null
+
+#ifndef __XEN_DOMAIN_H__
+#define __XEN_DOMAIN_H__
+
+
+
+/*
+ * Arch-specifics.
+ */
+
+extern void arch_do_createdomain(struct domain *d);
+
+extern int arch_final_setup_guestos(
+ struct domain *d, full_execution_context_t *c);
+
+extern void free_perdomain_pt(struct domain *d);
+
+extern void domain_relinquish_memory(struct domain *d);
+
+#endif /* __XEN_DOMAIN_H__ */
int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
void startup_cpu_idle_loop(void);
-void continue_cpu_idle_loop(void);
-
-void continue_nonidle_task(void);
unsigned long hypercall_create_continuation(
unsigned int op, unsigned int nr_args, ...);
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
#include <xen/slab.h>
-#include <asm/domain.h>
+#include <xen/domain.h>
#endif /* __SCHED_H__ */